From 17281aea1a9a10f1ee165c6e6a2921a67b7b1df2 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Fri, 22 Feb 2013 11:21:38 +0100 Subject: [PATCH] x86/nhvm: properly clean up after failure to set up all vCPU-s Otherwise we may leak memory when setting up nHVM fails half way. This implies that the individual destroy functions will have to remain capable (in the VMX case they first need to be made so, following 26486:7648ef657fe7 and 26489:83a3fa9c8434) of being called for a vCPU that the corresponding init function was never run on. Once at it, also remove a redundant check from the corresponding parameter validation code. Signed-off-by: Jan Beulich Acked-by: Tim Deegan Tested-by: Olaf Hering --- xen/arch/x86/hvm/hvm.c | 8 +++++--- xen/arch/x86/hvm/nestedhvm.c | 2 +- xen/arch/x86/hvm/vmx/vvmx.c | 30 ++++++++++++++++-------------- 3 files changed, 22 insertions(+), 18 deletions(-) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index febbffb982..ea7adf6780 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -3918,18 +3918,20 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg) } if ( a.value > 1 ) rc = -EINVAL; - if ( !is_hvm_domain(d) ) - rc = -EINVAL; /* Remove the check below once we have * shadow-on-shadow. */ if ( cpu_has_svm && !paging_mode_hap(d) && a.value ) rc = -EINVAL; /* Set up NHVM state for any vcpus that are already up */ - if ( !d->arch.hvm_domain.params[HVM_PARAM_NESTEDHVM] ) + if ( a.value && + !d->arch.hvm_domain.params[HVM_PARAM_NESTEDHVM] ) for_each_vcpu(d, v) if ( rc == 0 ) rc = nestedhvm_vcpu_initialise(v); + if ( !a.value || rc ) + for_each_vcpu(d, v) + nestedhvm_vcpu_destroy(v); break; case HVM_PARAM_BUFIOREQ_EVTCHN: rc = -EINVAL; diff --git a/xen/arch/x86/hvm/nestedhvm.c b/xen/arch/x86/hvm/nestedhvm.c index b3cf07dda3..964f58f1a5 100644 --- a/xen/arch/x86/hvm/nestedhvm.c +++ b/xen/arch/x86/hvm/nestedhvm.c @@ -87,7 +87,7 @@ nestedhvm_vcpu_initialise(struct vcpu *v) void nestedhvm_vcpu_destroy(struct vcpu *v) { - if ( nestedhvm_enabled(v->domain) && hvm_funcs.nhvm_vcpu_destroy ) + if ( hvm_funcs.nhvm_vcpu_destroy ) hvm_funcs.nhvm_vcpu_destroy(v); } diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c index 4f3f94d3c7..bb7688ff6c 100644 --- a/xen/arch/x86/hvm/vmx/vvmx.c +++ b/xen/arch/x86/hvm/vmx/vvmx.c @@ -62,7 +62,7 @@ int nvmx_vcpu_initialise(struct vcpu *v) if ( !nvcpu->nv_n2vmcx ) { gdprintk(XENLOG_ERR, "nest: allocation for shadow vmcs failed\n"); - goto out; + return -ENOMEM; } /* non-root VMREAD/VMWRITE bitmap. */ @@ -75,7 +75,7 @@ int nvmx_vcpu_initialise(struct vcpu *v) if ( !vmread_bitmap ) { gdprintk(XENLOG_ERR, "nest: allocation for vmread bitmap failed\n"); - goto out1; + return -ENOMEM; } v->arch.hvm_vmx.vmread_bitmap = vmread_bitmap; @@ -83,7 +83,7 @@ int nvmx_vcpu_initialise(struct vcpu *v) if ( !vmwrite_bitmap ) { gdprintk(XENLOG_ERR, "nest: allocation for vmwrite bitmap failed\n"); - goto out2; + return -ENOMEM; } v->arch.hvm_vmx.vmwrite_bitmap = vmwrite_bitmap; @@ -118,12 +118,6 @@ int nvmx_vcpu_initialise(struct vcpu *v) nvmx->msrbitmap = NULL; INIT_LIST_HEAD(&nvmx->launched_list); return 0; -out2: - free_domheap_page(v->arch.hvm_vmx.vmread_bitmap); -out1: - free_xenheap_page(nvcpu->nv_n2vmcx); -out: - return -ENOMEM; } void nvmx_vcpu_destroy(struct vcpu *v) @@ -147,16 +141,24 @@ void nvmx_vcpu_destroy(struct vcpu *v) nvcpu->nv_n2vmcx = NULL; } - list_for_each_entry_safe(item, n, &nvmx->launched_list, node) - { - list_del(&item->node); - xfree(item); - } + /* Must also cope with nvmx_vcpu_initialise() not having got called. */ + if ( nvmx->launched_list.next ) + list_for_each_entry_safe(item, n, &nvmx->launched_list, node) + { + list_del(&item->node); + xfree(item); + } if ( v->arch.hvm_vmx.vmread_bitmap ) + { free_domheap_page(v->arch.hvm_vmx.vmread_bitmap); + v->arch.hvm_vmx.vmread_bitmap = NULL; + } if ( v->arch.hvm_vmx.vmwrite_bitmap ) + { free_domheap_page(v->arch.hvm_vmx.vmwrite_bitmap); + v->arch.hvm_vmx.vmwrite_bitmap = NULL; + } } void nvmx_domain_relinquish_resources(struct domain *d) -- 2.30.2